[IA64] Add VTI related perfc
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Sun, 10 Sep 2006 20:31:54 +0000 (14:31 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Sun, 10 Sep 2006 20:31:54 +0000 (14:31 -0600)
This patch intends to add VTI-related and fw_hypercall counters.

Signed-off-by: Atsushi SAKAI <sakaia@jp.fujitsu.com>
Signed-off-by: Hiroya INAKOSHI <inakoshi.hiroya@jp.fujitsu.com>
xen/arch/ia64/vmx/mmio.c
xen/arch/ia64/vmx/pal_emul.c
xen/arch/ia64/vmx/vmx_interrupt.c
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/vmx/vmx_process.c
xen/arch/ia64/vmx/vmx_virt.c
xen/arch/ia64/xen/hypercall.c
xen/include/asm-ia64/perfc_defn.h

index 36cee507ec4088448b9e66bbdf1cdececc926e4a..95e7ec035129ef961fbd53b3bc446dd356af73cb 100644 (file)
@@ -213,6 +213,7 @@ static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int
     iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
     v_plat = vmx_vcpu_get_plat(vcpu);
 
+    perfc_incra(vmx_mmio_access, iot >> 56);
     switch (iot) {
     case GPFN_PIB:
         if(!dir)
index 4857dba00ab7b7b6f00b992a6917748fb92747a5..2c88fb34e2ab5bda92e5607eb294273a64b4b5ec 100644 (file)
@@ -389,6 +389,7 @@ pal_emul(VCPU *vcpu) {
 
        vcpu_get_gr_nat(vcpu,28,&gr28);  //bank1
 
+       perfc_incrc(vmx_pal_emul);
        switch (gr28) {
                case PAL_CACHE_FLUSH:
                        result = pal_cache_flush(vcpu);
index 8aec56fdfa15dbcf1bf0385374c3a514041e9f53..c1f6392c41c74fa829ff7e2fce3bdaafa37698cf 100644 (file)
@@ -92,6 +92,7 @@ inject_guest_interruption(VCPU *vcpu, u64 vec)
     u64 viva;
     REGS *regs;
     ISR pt_isr;
+    perfc_incra(vmx_inject_guest_interruption, vec >> 8);
     regs=vcpu_regs(vcpu);
     // clear cr.isr.ri 
     pt_isr.val = VMX(vcpu,cr_isr);
index 4d55c62bf677ea0b6538dbfcbfad980fc172ed19..8745721d54394c815331017c53a03c1edc65da60 100644 (file)
@@ -262,6 +262,7 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
     int act;
     REGS * regs=vcpu_regs(vcpu);
     act = mm_switch_action(old_psr, new_psr);
+    perfc_incra(vmx_switch_mm_mode, act);
     switch (act) {
     case SW_V2P:
 //        printf("V -> P mode transition: (0x%lx -> 0x%lx)\n",
index b56d8e7665a5f44cf98e182da8edf5a7e97f2b32..7867fd399313937e649838239721090366996495 100644 (file)
@@ -115,6 +115,7 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
     struct domain *d = current->domain;
     struct vcpu *v = current;
 
+    perfc_incrc(vmx_ia64_handle_break);
 #ifdef CRASH_DEBUG
     if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
         IS_VMM_ADDRESS(regs->cr_iip)) {
index 2fcfcf5bc8c17e21d92edd17c077033cb02a6904..6fcb37090a4b21dd93127539bf3e66fd4c13d8cd 100644 (file)
@@ -1398,120 +1398,159 @@ if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
 
     switch(cause) {
     case EVENT_RSM:
+        perfc_incrc(vmx_rsm);
         status=vmx_emul_rsm(vcpu, inst);
         break;
     case EVENT_SSM:
+        perfc_incrc(vmx_ssm);
         status=vmx_emul_ssm(vcpu, inst);
         break;
     case EVENT_MOV_TO_PSR:
+        perfc_incrc(vmx_mov_to_psr);
         status=vmx_emul_mov_to_psr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_PSR:
+        perfc_incrc(vmx_mov_from_psr);
         status=vmx_emul_mov_from_psr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_CR:
+        perfc_incrc(vmx_mov_from_cr);
         status=vmx_emul_mov_from_cr(vcpu, inst);
         break;
     case EVENT_MOV_TO_CR:
+        perfc_incrc(vmx_mov_to_cr);
         status=vmx_emul_mov_to_cr(vcpu, inst);
         break;
     case EVENT_BSW_0:
+        perfc_incrc(vmx_bsw0);
         status=vmx_emul_bsw0(vcpu, inst);
         break;
     case EVENT_BSW_1:
+        perfc_incrc(vmx_bsw1);
         status=vmx_emul_bsw1(vcpu, inst);
         break;
     case EVENT_COVER:
+        perfc_incrc(vmx_cover);
         status=vmx_emul_cover(vcpu, inst);
         break;
     case EVENT_RFI:
+        perfc_incrc(vmx_rfi);
         status=vmx_emul_rfi(vcpu, inst);
         break;
     case EVENT_ITR_D:
+        perfc_incrc(vmx_itr_d);
         status=vmx_emul_itr_d(vcpu, inst);
         break;
     case EVENT_ITR_I:
+        perfc_incrc(vmx_itr_i);
         status=vmx_emul_itr_i(vcpu, inst);
         break;
     case EVENT_PTR_D:
+        perfc_incrc(vmx_ptr_d);
         status=vmx_emul_ptr_d(vcpu, inst);
         break;
     case EVENT_PTR_I:
+        perfc_incrc(vmx_ptr_i);
         status=vmx_emul_ptr_i(vcpu, inst);
         break;
     case EVENT_ITC_D:
+        perfc_incrc(vmx_itc_d);
         status=vmx_emul_itc_d(vcpu, inst);
         break;
     case EVENT_ITC_I:
+        perfc_incrc(vmx_itc_i);
         status=vmx_emul_itc_i(vcpu, inst);
         break;
     case EVENT_PTC_L:
+        perfc_incrc(vmx_ptc_l);
         status=vmx_emul_ptc_l(vcpu, inst);
         break;
     case EVENT_PTC_G:
+        perfc_incrc(vmx_ptc_g);
         status=vmx_emul_ptc_g(vcpu, inst);
         break;
     case EVENT_PTC_GA:
+        perfc_incrc(vmx_ptc_ga);
         status=vmx_emul_ptc_ga(vcpu, inst);
         break;
     case EVENT_PTC_E:
+        perfc_incrc(vmx_ptc_e);
         status=vmx_emul_ptc_e(vcpu, inst);
         break;
     case EVENT_MOV_TO_RR:
+        perfc_incrc(vmx_mov_to_rr);
         status=vmx_emul_mov_to_rr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_RR:
+        perfc_incrc(vmx_mov_from_rr);
         status=vmx_emul_mov_from_rr(vcpu, inst);
         break;
     case EVENT_THASH:
+        perfc_incrc(vmx_thash);
         status=vmx_emul_thash(vcpu, inst);
         break;
     case EVENT_TTAG:
+        perfc_incrc(vmx_ttag);
         status=vmx_emul_ttag(vcpu, inst);
         break;
     case EVENT_TPA:
+        perfc_incrc(vmx_tpa);
         status=vmx_emul_tpa(vcpu, inst);
         break;
     case EVENT_TAK:
+        perfc_incrc(vmx_tak);
         status=vmx_emul_tak(vcpu, inst);
         break;
     case EVENT_MOV_TO_AR_IMM:
+        perfc_incrc(vmx_mov_to_ar_imm);
         status=vmx_emul_mov_to_ar_imm(vcpu, inst);
         break;
     case EVENT_MOV_TO_AR:
+        perfc_incrc(vmx_mov_to_ar_reg);
         status=vmx_emul_mov_to_ar_reg(vcpu, inst);
         break;
     case EVENT_MOV_FROM_AR:
+        perfc_incrc(vmx_mov_from_ar_reg);
         status=vmx_emul_mov_from_ar_reg(vcpu, inst);
         break;
     case EVENT_MOV_TO_DBR:
+        perfc_incrc(vmx_mov_to_dbr);
         status=vmx_emul_mov_to_dbr(vcpu, inst);
         break;
     case EVENT_MOV_TO_IBR:
+        perfc_incrc(vmx_mov_to_ibr);
         status=vmx_emul_mov_to_ibr(vcpu, inst);
         break;
     case EVENT_MOV_TO_PMC:
+        perfc_incrc(vmx_mov_to_pmc);
         status=vmx_emul_mov_to_pmc(vcpu, inst);
         break;
     case EVENT_MOV_TO_PMD:
+        perfc_incrc(vmx_mov_to_pmd);
         status=vmx_emul_mov_to_pmd(vcpu, inst);
         break;
     case EVENT_MOV_TO_PKR:
+        perfc_incrc(vmx_mov_to_pkr);
         status=vmx_emul_mov_to_pkr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_DBR:
+        perfc_incrc(vmx_mov_from_dbr);
         status=vmx_emul_mov_from_dbr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_IBR:
+        perfc_incrc(vmx_mov_from_ibr);
         status=vmx_emul_mov_from_ibr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_PMC:
+        perfc_incrc(vmx_mov_from_pmc);
         status=vmx_emul_mov_from_pmc(vcpu, inst);
         break;
     case EVENT_MOV_FROM_PKR:
+        perfc_incrc(vmx_mov_from_pkr);
         status=vmx_emul_mov_from_pkr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_CPUID:
+        perfc_incrc(vmx_mov_from_cpuid);
         status=vmx_emul_mov_from_cpuid(vcpu, inst);
         break;
     case EVENT_VMSW:
index 1f915bb532f5828045d384fa48251cd1a50a70f5..17ad53bd7127a14dae02951588d9f08b4e9a75c8 100644 (file)
@@ -211,6 +211,7 @@ fw_hypercall (struct pt_regs *regs)
        IA64FAULT fault; 
        unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;
 
+       perfc_incra(fw_hypercall, index >> 8);
        switch (index) {
            case FW_HYPERCALL_PAL_CALL:
                //printf("*** PAL hypercall: index=%d\n",regs->r28);
index d3776611ffdea8fa9b39e433920999d23f89fce4..58d52a97ff2a6c5c688159ac307449bb93970e98 100644 (file)
@@ -35,6 +35,48 @@ PERFCOUNTER_ARRAY(mov_from_cr,        "privop mov from cr", 128)
 
 PERFCOUNTER_ARRAY(misc_privop,        "privop misc", 64)
 
+// privileged instructions to fall into vmx_entry
+PERFCOUNTER_CPU(vmx_rsm,              "vmx privop rsm")
+PERFCOUNTER_CPU(vmx_ssm,              "vmx privop ssm")
+PERFCOUNTER_CPU(vmx_mov_to_psr,       "vmx privop mov_to_psr")
+PERFCOUNTER_CPU(vmx_mov_from_psr,     "vmx privop mov_from_psr")
+PERFCOUNTER_CPU(vmx_mov_from_cr,      "vmx privop mov_from_cr")
+PERFCOUNTER_CPU(vmx_mov_to_cr,        "vmx privop mov_to_cr")
+PERFCOUNTER_CPU(vmx_bsw0,             "vmx privop bsw0")
+PERFCOUNTER_CPU(vmx_bsw1,             "vmx privop bsw1")
+PERFCOUNTER_CPU(vmx_cover,            "vmx privop cover")
+PERFCOUNTER_CPU(vmx_rfi,              "vmx privop rfi")
+PERFCOUNTER_CPU(vmx_itr_d,            "vmx privop itr_d")
+PERFCOUNTER_CPU(vmx_itr_i,            "vmx privop itr_i")
+PERFCOUNTER_CPU(vmx_ptr_d,            "vmx privop ptr_d")
+PERFCOUNTER_CPU(vmx_ptr_i,            "vmx privop ptr_i")
+PERFCOUNTER_CPU(vmx_itc_d,            "vmx privop itc_d")
+PERFCOUNTER_CPU(vmx_itc_i,            "vmx privop itc_i")
+PERFCOUNTER_CPU(vmx_ptc_l,            "vmx privop ptc_l")
+PERFCOUNTER_CPU(vmx_ptc_g,            "vmx privop ptc_g")
+PERFCOUNTER_CPU(vmx_ptc_ga,           "vmx privop ptc_ga")
+PERFCOUNTER_CPU(vmx_ptc_e,            "vmx privop ptc_e")
+PERFCOUNTER_CPU(vmx_mov_to_rr,        "vmx privop mov_to_rr")
+PERFCOUNTER_CPU(vmx_mov_from_rr,      "vmx privop mov_from_rr")
+PERFCOUNTER_CPU(vmx_thash,            "vmx privop thash")
+PERFCOUNTER_CPU(vmx_ttag,             "vmx privop ttag")
+PERFCOUNTER_CPU(vmx_tpa,              "vmx privop tpa")
+PERFCOUNTER_CPU(vmx_tak,              "vmx privop tak")
+PERFCOUNTER_CPU(vmx_mov_to_ar_imm,    "vmx privop mov_to_ar_imm")
+PERFCOUNTER_CPU(vmx_mov_to_ar_reg,    "vmx privop mov_to_ar_reg")
+PERFCOUNTER_CPU(vmx_mov_from_ar_reg,  "vmx privop mov_from_ar_reg")
+PERFCOUNTER_CPU(vmx_mov_to_dbr,       "vmx privop mov_to_dbr")
+PERFCOUNTER_CPU(vmx_mov_to_ibr,       "vmx privop mov_to_ibr")
+PERFCOUNTER_CPU(vmx_mov_to_pmc,       "vmx privop mov_to_pmc")
+PERFCOUNTER_CPU(vmx_mov_to_pmd,       "vmx privop mov_to_pmd")
+PERFCOUNTER_CPU(vmx_mov_to_pkr,       "vmx privop mov_to_pkr")
+PERFCOUNTER_CPU(vmx_mov_from_dbr,     "vmx privop mov_from_dbr")
+PERFCOUNTER_CPU(vmx_mov_from_ibr,     "vmx privop mov_from_ibr")
+PERFCOUNTER_CPU(vmx_mov_from_pmc,     "vmx privop mov_from_pmc")
+PERFCOUNTER_CPU(vmx_mov_from_pkr,     "vmx privop mov_from_pkr")
+PERFCOUNTER_CPU(vmx_mov_from_cpuid,   "vmx privop mov_from_cpuid")
+
+
 PERFCOUNTER_ARRAY(slow_hyperprivop,   "slow hyperprivops", HYPERPRIVOP_MAX + 1)
 PERFCOUNTER_ARRAY(fast_hyperprivop,   "fast hyperprivops", HYPERPRIVOP_MAX + 1)
 
@@ -44,6 +86,14 @@ PERFCOUNTER_ARRAY(fast_reflect,       "fast reflection", 0x80)
 PERFSTATUS(vhpt_nbr_entries,          "nbr of entries per VHPT")
 PERFSTATUS_CPU(vhpt_valid_entries,    "nbr of valid entries in VHPT")
 
+PERFCOUNTER_ARRAY(vmx_mmio_access,    "vmx_mmio_access", 8)
+PERFCOUNTER_CPU(vmx_pal_emul,         "vmx_pal_emul")
+PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8)
+PERFCOUNTER_CPU(vmx_ia64_handle_break,"vmx_ia64_handle_break")
+PERFCOUNTER_ARRAY(vmx_inject_guest_interruption,
+                                      "vmx_inject_guest_interruption", 0x80)
+PERFCOUNTER_ARRAY(fw_hypercall,       "fw_hypercall", 0x20)
+
 #ifdef CONFIG_PRIVOP_ADDRS
 #ifndef PERFPRIVOPADDR
 #define PERFPRIVOPADDR(name) \